Workaround for CVE-2017-5715 on Cortex A57 and A72
authorDimitris Papastamos <[email protected]>
Thu, 30 Nov 2017 14:53:53 +0000 (14:53 +0000)
committerDimitris Papastamos <[email protected]>
Thu, 11 Jan 2018 10:26:15 +0000 (10:26 +0000)
Invalidate the Branch Target Buffer (BTB) on entry to EL3 by disabling
and enabling the MMU.  To achieve this without performing any branch
instruction, a per-cpu vbar is installed which executes the workaround
and then branches off to the corresponding vector entry in the main
vector table.  A side effect of this change is that the main vbar is
configured before any reset handling.  This is to allow the per-cpu
reset function to override the vbar setting.

This workaround is enabled by default on the affected CPUs.

Change-Id: I97788d38463a5840a410e3cea85ed297a1678265
Signed-off-by: Dimitris Papastamos <[email protected]>
bl31/aarch64/runtime_exceptions.S
bl31/bl31.mk
docs/cpu-specific-build-macros.rst
include/common/aarch64/el3_common_macros.S
lib/cpus/aarch64/cortex_a57.S
lib/cpus/aarch64/cortex_a72.S
lib/cpus/aarch64/workaround_cve_2017_5715_mmu.S [new file with mode: 0644]
lib/cpus/cpu-ops.mk

index d8fbb9b218a7599d63177c1061aa3dd89b4c8c59..9b7735f1e4ba92dda57446d57b848aefbbc70688 100644 (file)
 
        .globl  runtime_exceptions
 
+       .globl  sync_exception_sp_el0
+       .globl  irq_sp_el0
+       .globl  fiq_sp_el0
+       .globl  serror_sp_el0
+
+       .globl  sync_exception_sp_elx
+       .globl  irq_sp_elx
+       .globl  fiq_sp_elx
+       .globl  serror_sp_elx
+
+       .globl  sync_exception_aarch64
+       .globl  irq_aarch64
+       .globl  fiq_aarch64
+       .globl  serror_aarch64
+
+       .globl  sync_exception_aarch32
+       .globl  irq_aarch32
+       .globl  fiq_aarch32
+       .globl  serror_aarch32
+
        /* ---------------------------------------------------------------------
         * This macro handles Synchronous exceptions.
         * Only SMC exceptions are supported.
index fdcc93139ad3202d2b8fa5dea88f813d589e5711..0732e05215e12542c6a636a012f031da7653fef0 100644 (file)
@@ -58,6 +58,10 @@ ifeq (${ENABLE_SVE_FOR_NS},1)
 BL31_SOURCES           +=      lib/extensions/sve/sve.c
 endif
 
+ifeq (${WORKAROUND_CVE_2017_5715},1)
+BL31_SOURCES           +=      lib/cpus/aarch64/workaround_cve_2017_5715_mmu.S
+endif
+
 BL31_LINKERFILE                :=      bl31/bl31.ld.S
 
 # Flag used to indicate if Crash reporting via console should be included
index f74b45933ecb51ce85ad31ce74b98d19747fac5a..014817d3982a3f3c5861540050c44c257e88f2b5 100644 (file)
@@ -11,6 +11,15 @@ This document describes the various build options present in the CPU specific
 operations framework to enable errata workarounds and to enable optimizations
 for a specific CPU on a platform.
 
+Security Vulnerability Workarounds
+----------------------------------
+
+ARM Trusted Firmware exports a series of build flags which control which
+security vulnerability workarounds should be applied at runtime.
+
+-  ``WORKAROUND_CVE_2017_5715``: Enables the security workaround for
+   `CVE-2017-5715`_. Defaults to 1.
+
 CPU Errata Workarounds
 ----------------------
 
@@ -142,6 +151,7 @@ architecture that can be enabled by the platform as desired.
 
 *Copyright (c) 2014-2016, ARM Limited and Contributors. All rights reserved.*
 
+.. _CVE-2017-5715: http://www.cve.mitre.org/cgi-bin/cvename.cgi?name=2017-5715
 .. _Cortex-A53 MPCore Software Developers Errata Notice: http://infocenter.arm.com/help/topic/com.arm.doc.epm048406/Cortex_A53_MPCore_Software_Developers_Errata_Notice.pdf
 .. _Cortex-A57 MPCore Software Developers Errata Notice: http://infocenter.arm.com/help/topic/com.arm.doc.epm049219/cortex_a57_mpcore_software_developers_errata_notice.pdf
 .. _Cortex-A72 MPCore Software Developers Errata Notice: http://infocenter.arm.com/help/topic/com.arm.doc.epm012079/index.html
index 63a0fa770f6fcf1611dc6083e336b3bb1eae2005..defd4a24e3529d48d3036feba0c405a0d44a3c9f 100644 (file)
@@ -13,7 +13,7 @@
        /*
         * Helper macro to initialise EL3 registers we care about.
         */
-       .macro el3_arch_init_common _exception_vectors
+       .macro el3_arch_init_common
        /* ---------------------------------------------------------------------
         * SCTLR_EL3 has already been initialised - read current value before
         * modifying.
        bl      init_cpu_data_ptr
 #endif /* IMAGE_BL31 */
 
-       /* ---------------------------------------------------------------------
-        * Set the exception vectors.
-        * ---------------------------------------------------------------------
-        */
-       adr     x0, \_exception_vectors
-       msr     vbar_el3, x0
-       isb
-
        /* ---------------------------------------------------------------------
         * Initialise SCR_EL3, setting all fields rather than relying on hw.
         * All fields are architecturally UNKNOWN on reset. The following fields
        do_cold_boot:
        .endif /* _warm_boot_mailbox */
 
+       /* ---------------------------------------------------------------------
+        * Set the exception vectors.
+        * ---------------------------------------------------------------------
+        */
+       adr     x0, \_exception_vectors
+       msr     vbar_el3, x0
+       isb
+
        /* ---------------------------------------------------------------------
         * It is a cold boot.
         * Perform any processor specific actions upon reset e.g. cache, TLB
         */
        bl      reset_handler
 
-       el3_arch_init_common \_exception_vectors
+       el3_arch_init_common
 
        .if \_secondary_cold_boot
                /* -------------------------------------------------------------
index a720e984ae9cc132f880df618d74e3cfab2fc65b..683be47e5203dacb439782e354ceaab12425ca09 100644 (file)
@@ -383,6 +383,11 @@ func cortex_a57_reset_func
        bl      errata_a57_859972_wa
 #endif
 
+#if IMAGE_BL31 && WORKAROUND_CVE_2017_5715
+       adr     x0, workaround_mmu_runtime_exceptions
+       msr     vbar_el3, x0
+#endif
+
        /* ---------------------------------------------
         * Enable the SMP bit.
         * ---------------------------------------------
index b0341256568de8c0d0af749a74ce32d824cea9db..93821b7493f3e4b192a707e09ec3dc99e499bb39 100644 (file)
@@ -110,6 +110,12 @@ func cortex_a72_reset_func
        mov     x0, x18
        bl      errata_a72_859971_wa
 #endif
+
+#if IMAGE_BL31 && WORKAROUND_CVE_2017_5715
+       adr     x0, workaround_mmu_runtime_exceptions
+       msr     vbar_el3, x0
+#endif
+
        /* ---------------------------------------------
         * Enable the SMP bit.
         * ---------------------------------------------
diff --git a/lib/cpus/aarch64/workaround_cve_2017_5715_mmu.S b/lib/cpus/aarch64/workaround_cve_2017_5715_mmu.S
new file mode 100644 (file)
index 0000000..f478148
--- /dev/null
@@ -0,0 +1,114 @@
+/*
+ * Copyright (c) 2017, ARM Limited and Contributors. All rights reserved.
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ */
+
+#include <arch.h>
+#include <asm_macros.S>
+#include <context.h>
+
+       .globl  workaround_mmu_runtime_exceptions
+
+vector_base workaround_mmu_runtime_exceptions
+
+       .macro  apply_workaround
+       stp     x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
+       mrs     x0, sctlr_el3
+       /* Disable MMU */
+       bic     x1, x0, #SCTLR_M_BIT
+       msr     sctlr_el3, x1
+       isb
+       /* Restore MMU config */
+       msr     sctlr_el3, x0
+       isb
+       ldp     x0, x1, [sp, #CTX_GPREGS_OFFSET + CTX_GPREG_X0]
+       .endm
+
+       /* ---------------------------------------------------------------------
+        * Current EL with SP_EL0 : 0x0 - 0x200
+        * ---------------------------------------------------------------------
+        */
+vector_entry workaround_mmu_sync_exception_sp_el0
+       b       sync_exception_sp_el0
+       check_vector_size workaround_mmu_sync_exception_sp_el0
+
+vector_entry workaround_mmu_irq_sp_el0
+       b       irq_sp_el0
+       check_vector_size workaround_mmu_irq_sp_el0
+
+vector_entry workaround_mmu_fiq_sp_el0
+       b       fiq_sp_el0
+       check_vector_size workaround_mmu_fiq_sp_el0
+
+vector_entry workaround_mmu_serror_sp_el0
+       b       serror_sp_el0
+       check_vector_size workaround_mmu_serror_sp_el0
+
+       /* ---------------------------------------------------------------------
+        * Current EL with SP_ELx: 0x200 - 0x400
+        * ---------------------------------------------------------------------
+        */
+vector_entry workaround_mmu_sync_exception_sp_elx
+       b       sync_exception_sp_elx
+       check_vector_size workaround_mmu_sync_exception_sp_elx
+
+vector_entry workaround_mmu_irq_sp_elx
+       b       irq_sp_elx
+       check_vector_size workaround_mmu_irq_sp_elx
+
+vector_entry workaround_mmu_fiq_sp_elx
+       b       fiq_sp_elx
+       check_vector_size workaround_mmu_fiq_sp_elx
+
+vector_entry workaround_mmu_serror_sp_elx
+       b       serror_sp_elx
+       check_vector_size workaround_mmu_serror_sp_elx
+
+       /* ---------------------------------------------------------------------
+        * Lower EL using AArch64 : 0x400 - 0x600
+        * ---------------------------------------------------------------------
+        */
+vector_entry workaround_mmu_sync_exception_aarch64
+       apply_workaround
+       b       sync_exception_aarch64
+       check_vector_size workaround_mmu_sync_exception_aarch64
+
+vector_entry workaround_mmu_irq_aarch64
+       apply_workaround
+       b       irq_aarch64
+       check_vector_size workaround_mmu_irq_aarch64
+
+vector_entry workaround_mmu_fiq_aarch64
+       apply_workaround
+       b       fiq_aarch64
+       check_vector_size workaround_mmu_fiq_aarch64
+
+vector_entry workaround_mmu_serror_aarch64
+       apply_workaround
+       b       serror_aarch64
+       check_vector_size workaround_mmu_serror_aarch64
+
+       /* ---------------------------------------------------------------------
+        * Lower EL using AArch32 : 0x600 - 0x800
+        * ---------------------------------------------------------------------
+        */
+vector_entry workaround_mmu_sync_exception_aarch32
+       apply_workaround
+       b       sync_exception_aarch32
+       check_vector_size workaround_mmu_sync_exception_aarch32
+
+vector_entry workaround_mmu_irq_aarch32
+       apply_workaround
+       b       irq_aarch32
+       check_vector_size workaround_mmu_irq_aarch32
+
+vector_entry workaround_mmu_fiq_aarch32
+       apply_workaround
+       b       fiq_aarch32
+       check_vector_size workaround_mmu_fiq_aarch32
+
+vector_entry workaround_mmu_serror_aarch32
+       apply_workaround
+       b       serror_aarch32
+       check_vector_size workaround_mmu_serror_aarch32
index 31adfb42840b50ea7a7ea6aff3eeab37ed3512fe..3ba8c1fcca5bf3fadcc3862f1183580abba4e7ce 100644 (file)
@@ -16,6 +16,8 @@ A53_DISABLE_NON_TEMPORAL_HINT ?=1
 # It is enabled by default.
 A57_DISABLE_NON_TEMPORAL_HINT  ?=1
 
+WORKAROUND_CVE_2017_5715       ?=1
+
 # Process SKIP_A57_L1_FLUSH_PWR_DWN flag
 $(eval $(call assert_boolean,SKIP_A57_L1_FLUSH_PWR_DWN))
 $(eval $(call add_define,SKIP_A57_L1_FLUSH_PWR_DWN))
@@ -28,6 +30,9 @@ $(eval $(call add_define,A53_DISABLE_NON_TEMPORAL_HINT))
 $(eval $(call assert_boolean,A57_DISABLE_NON_TEMPORAL_HINT))
 $(eval $(call add_define,A57_DISABLE_NON_TEMPORAL_HINT))
 
+# Process WORKAROUND_CVE_2017_5715 flag
+$(eval $(call assert_boolean,WORKAROUND_CVE_2017_5715))
+$(eval $(call add_define,WORKAROUND_CVE_2017_5715))
 
 # CPU Errata Build flags.
 # These should be enabled by the platform if the erratum workaround needs to be